Meteo Imputation
  • Library Docs

On this page

  • Training Kalman Filter for Results
    • TA - gap 12
      • TA - 96
    • All variables - 12 varying
    • All variables - 12 all gap
    • SW_IN - gap 12
      • SW_IN - 96
    • Control
      • No Control - TA 12
      • No Control - TA 96
      • No Control - All variables - 12 all gap
      • No Control - All variables - 24 all gap
      • No Control - All variables - 24 varying
      • No Control - All variables - 36 all gap
      • No Control - All variables - 48 all gap
    • LW_IN - gap 12
      • LW_IN - 96
    • Conditional

Training Kalman Filter for Results

%load_ext autoreload
%autoreload 2
from meteo_imp.kalman.fastai import *
from meteo_imp.kalman.filter import *
from meteo_imp.utils import *
from meteo_imp.data import *
from meteo_imp.gaussian import *

from fastai.tabular.learner import *
from fastai.learner import *
from fastai.callback.all import *
from fastcore.foundation import L

from meteo_imp.kalman.fastai import show_results
import pandas as pd
import numpy as np
import torch
import random

from sklearn.decomposition import PCA
reset_seed()
haiB = pd.read_parquet(hai_big_path)
hai_eraB = pd.read_parquet(hai_era_big_path)
list(haiB.columns)
['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN']

TA - gap 12

dls_TA = imp_dataloader(haiB, hai_eraB, var_sel = 'TA', block_len=50, gap_len=12, bs=20, control_lags=[1], n_rep=5).cpu()
model_TA = KalmanFilterSR.init_local_slope_pca(len(haiB.columns),len(haiB.columns), df_pca = haiB, pred_only_gap=True, use_conditional=True)
save_models_TA = SaveModelsBatch(times_epoch=10)
items_TA = random.choices(dls_TA.valid.items, k=4)
learn_TA = Learner(dls_TA, model_TA, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_TA], metrics=rmse_gap)
learn_TA.fit(1, 1e-3)
epoch train_loss valid_loss rmse_gap time
0 -17.669978 -18.692597 0.040505 11:47
learn_TA.recorder.plot_loss()

torch.save(learn_TA.model, "trained_4_feb_TA_gap_12_v1.pickle")
learn_TA

Kalman Filter (7 obs, 14 state, 14 contr)

$A$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 1.0540 0.0263 -0.1801 0.1166 -0.0537 0.0176 -0.0122 0.9143 0.0358 0.2069 -0.1760 0.0973 0.0417 -0.0181
x_1 0.0331 1.0263 -0.1854 0.1730 -0.0476 0.0026 -0.0084 0.0203 1.0045 0.2047 -0.2114 0.0382 0.1549 -0.0265
x_2 -0.0073 -0.0518 1.1940 -0.1817 0.0451 0.0350 -0.0043 -0.0032 0.0095 0.7757 0.1996 -0.0706 -0.0505 0.0121
x_3 0.0022 0.0489 -0.2066 1.1895 -0.0348 -0.0384 0.0039 0.0109 -0.0043 0.2244 0.7726 0.0558 0.0648 -0.0132
x_4 -0.0512 -0.0249 0.2067 -0.1689 1.0340 -0.0274 0.0076 0.0279 -0.0143 -0.2281 0.2212 0.9361 -0.0667 0.0287
x_5 -0.0319 -0.0366 0.1644 -0.1662 0.0304 0.9821 0.0050 -0.0164 0.0291 -0.2027 0.2111 -0.0685 0.8243 0.0220
x_6 -0.0558 0.0097 0.0369 -0.0054 0.0365 -0.0207 1.0128 -0.0391 0.0098 -0.0321 0.0340 -0.0274 -0.0362 1.1283
x_7 0.0175 -0.0001 -0.0490 0.0358 -0.0187 0.0404 -0.0147 1.0056 -0.0110 -0.1325 0.0999 0.0109 0.0650 0.0072
x_8 0.0198 0.0140 -0.0681 0.0669 -0.0049 0.0069 -0.0147 -0.0261 0.9871 -0.0827 0.0552 0.0196 0.0729 0.0038
x_9 -0.0124 -0.0219 0.0507 -0.0514 0.0192 -0.0041 0.0071 0.0212 0.0110 1.0889 -0.0715 -0.0237 -0.0894 -0.0072
x_10 0.0022 0.0194 -0.0439 0.0492 -0.0057 -0.0017 -0.0091 -0.0213 -0.0032 -0.1142 1.0904 0.0206 0.0681 0.0026
x_11 -0.0269 -0.0079 0.0741 -0.0596 0.0211 -0.0365 0.0127 0.0027 0.0129 0.0879 -0.0527 0.9728 -0.0868 -0.0051
x_12 -0.0138 -0.0181 0.0449 -0.0477 -0.0084 -0.0033 0.0205 0.0170 0.0028 0.1311 -0.1018 -0.0136 0.9346 -0.0122
x_13 -0.0303 -0.0016 0.0642 -0.0774 0.0129 -0.0009 0.0150 0.0006 0.0311 -0.0401 0.0387 -0.0224 -0.0663 1.0180

$Q$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 0.0603 0.0128 0.0115 0.0061 0.0150 -0.0126 0.0103 0.0076 -0.0200 -0.0013 -0.0074 0.0004 0.0211 0.0006
x_1 0.0128 0.1126 0.0136 -0.0148 -0.0008 -0.0037 -0.0113 0.0081 -0.0175 0.0001 0.0029 0.0102 -0.0094 0.0078
x_2 0.0115 0.0136 0.0798 0.0384 -0.0597 -0.0427 -0.0099 -0.0391 0.0153 0.0081 -0.0137 -0.0111 0.0390 -0.0067
x_3 0.0061 -0.0148 0.0384 0.0868 0.0370 0.0523 -0.0017 0.0122 -0.0219 0.0202 -0.0043 0.0327 -0.0257 0.0129
x_4 0.0150 -0.0008 -0.0597 0.0370 0.1871 0.0655 0.0114 0.0448 -0.0234 0.0061 0.0197 0.0317 -0.0447 0.0082
x_5 -0.0126 -0.0037 -0.0427 0.0523 0.0655 0.1900 0.0022 0.0155 -0.0198 -0.0082 -0.0155 0.0184 -0.0427 0.0062
x_6 0.0103 -0.0113 -0.0099 -0.0017 0.0114 0.0022 0.1411 0.0030 -0.0052 -0.0030 0.0024 0.0027 -0.0025 -0.0092
x_7 0.0076 0.0081 -0.0391 0.0122 0.0448 0.0155 0.0030 0.1724 -0.0748 0.0415 0.0064 0.0843 -0.0691 0.0358
x_8 -0.0200 -0.0175 0.0153 -0.0219 -0.0234 -0.0198 -0.0052 -0.0748 0.1626 -0.0003 -0.0328 -0.0405 0.1111 -0.0152
x_9 -0.0013 0.0001 0.0081 0.0202 0.0061 -0.0082 -0.0030 0.0415 -0.0003 0.1007 0.0491 -0.0237 -0.0595 -0.0133
x_10 -0.0074 0.0029 -0.0137 -0.0043 0.0197 -0.0155 0.0024 0.0064 -0.0328 0.0491 0.1143 0.0264 -0.0264 0.0155
x_11 0.0004 0.0102 -0.0111 0.0327 0.0317 0.0184 0.0027 0.0843 -0.0405 -0.0237 0.0264 0.2406 -0.0565 0.0455
x_12 0.0211 -0.0094 0.0390 -0.0257 -0.0447 -0.0427 -0.0025 -0.0691 0.1111 -0.0595 -0.0264 -0.0565 0.2944 -0.0166
x_13 0.0006 0.0078 -0.0067 0.0129 0.0082 0.0062 -0.0092 0.0358 -0.0152 -0.0133 0.0155 0.0455 -0.0166 0.2207

$b$

state offset
x_0 -0.0051
x_1 0.0013
x_2 -0.0071
x_3 0.0069
x_4 0.0000
x_5 0.0048
x_6 0.0097
x_7 -0.0066
x_8 -0.0045
x_9 0.0044
x_10 -0.0051
x_11 0.0065
x_12 0.0051
x_13 -0.0005

$H$

variable x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
y_0 0.0096 -0.1188 0.6133 -0.2500 0.0814 0.0259 0.0046 -0.0204 -0.0681 0.1127 -0.0955 0.0484 -0.0042 0.0207
y_1 0.9850 0.1105 0.2278 -0.0157 0.0148 -0.1788 0.0102 0.1131 -0.0796 0.1846 -0.1719 0.0587 0.1843 -0.0333
y_2 0.0415 -0.0219 0.7257 0.6141 -0.0440 0.0144 -0.0002 0.0166 0.0696 -0.1107 0.0991 -0.0417 -0.0550 -0.0219
y_3 0.0665 0.0416 0.2722 -0.2680 -0.2304 -0.9431 0.0098 0.0517 0.0827 -0.0350 0.0336 -0.0461 0.0551 -0.0349
y_4 -0.0238 -0.0257 -0.0034 -0.0257 0.0291 0.0403 1.2371 -0.0236 0.0283 -0.0672 0.0391 -0.0402 -0.0049 -0.1147
y_5 0.1026 -0.1011 -0.2424 0.0548 1.0465 0.0073 0.0193 -0.0573 -0.0184 -0.1912 0.2090 0.0438 -0.0392 0.0262
y_6 -0.1024 -1.0913 -0.1905 0.2058 0.0868 -0.1214 0.0166 -0.0516 0.0307 0.0977 -0.0751 -0.0070 -0.0340 0.0021

$R$

variable y_0 y_1 y_2 y_3 y_4 y_5 y_6
y_0 0.0024 -0.0171 0.0077 0.0078 -0.0003 0.0176 -0.0061
y_1 -0.0171 0.1305 -0.0550 -0.0581 0.0045 -0.1186 0.0311
y_2 0.0077 -0.0550 0.0310 0.0102 0.0010 0.0652 -0.0293
y_3 0.0078 -0.0581 0.0102 0.0699 -0.0053 0.0594 0.0204
y_4 -0.0003 0.0045 0.0010 -0.0053 0.0145 0.0066 -0.0025
y_5 0.0176 -0.1186 0.0652 0.0594 0.0066 0.2732 0.0217
y_6 -0.0061 0.0311 -0.0293 0.0204 -0.0025 0.0217 0.1247

$d$

variable offset
y_0 -0.0038
y_1 -0.0016
y_2 0.0033
y_3 -0.0048
y_4 0.0000
y_5 -0.0174
y_6 -0.0078

$B$

state c_0 c_1 c_2 c_3 c_4 c_5 c_6 c_7 c_8 c_9 c_10 c_11 c_12 c_13
x_0 0.1167 -0.9769 -0.0553 0.0013 0.0001 -0.0145 -0.0575 0.1969 0.9707 -0.0257 -0.0092 0.0141 0.0467 0.0306
x_1 0.1976 0.0029 -0.0212 0.0082 -0.0222 -0.0687 0.9226 0.0571 0.0154 -0.0553 -0.0020 -0.0017 0.0169 -0.9740
x_2 -0.8669 -0.0072 -0.4643 -0.0086 0.0123 0.1030 0.1404 0.7364 0.0033 0.4928 0.0606 -0.0111 -0.0839 -0.1544
x_3 0.5027 0.0291 -0.8831 -0.0053 -0.0012 -0.0808 -0.0618 -0.3491 -0.0220 0.8552 -0.0398 0.0016 0.0779 0.0841
x_4 -0.1806 -0.0226 0.0442 0.1440 0.0033 -0.9289 0.0550 -0.1573 0.0184 0.0263 -0.1280 0.0084 0.9363 -0.0234
x_5 -0.1248 -0.0215 0.0321 0.9747 -0.0141 0.2016 0.0474 -0.1575 0.0181 0.0381 -0.9735 0.0186 -0.1620 -0.0088
x_6 -0.0989 -0.0234 0.0083 -0.0287 -0.9298 0.0117 0.0258 -0.1111 -0.0121 0.0174 0.0214 0.9692 -0.0285 0.0169
x_7 0.0672 -0.0093 -0.0187 0.0061 -0.0021 0.0007 -0.0170 0.0515 -0.0079 -0.0154 0.0087 -0.0062 -0.0037 -0.0194
x_8 0.0901 -0.0058 -0.0231 0.0070 0.0006 0.0007 -0.0120 0.0943 -0.0096 -0.0184 0.0063 0.0007 0.0153 -0.0104
x_9 -0.0884 0.0108 0.0269 -0.0104 -0.0066 -0.0051 0.0058 -0.0851 0.0131 0.0219 -0.0113 -0.0029 -0.0140 0.0054
x_10 0.0883 -0.0110 -0.0225 0.0082 0.0050 0.0123 -0.0054 0.0796 -0.0118 -0.0177 0.0095 0.0018 0.0159 -0.0074
x_11 -0.0780 0.0025 0.0207 -0.0054 0.0053 0.0003 0.0141 -0.0827 0.0086 0.0160 -0.0047 0.0042 -0.0142 0.0131
x_12 -0.0863 0.0055 0.0210 -0.0079 0.0008 -0.0018 0.0159 -0.0798 0.0033 0.0176 -0.0084 -0.0008 -0.0056 0.0178
x_13 -0.0636 -0.0051 0.0192 -0.0140 -0.0009 0.0355 0.0006 -0.0683 0.0025 0.0169 -0.0139 -0.0022 0.0233 -0.0019

$m_0$

state mean
x_0 0.0011
x_1 -0.0008
x_2 -0.0004
x_3 -0.0001
x_4 -0.0002
x_5 0.0003
x_6 -0.0000
x_7 -0.0008
x_8 0.0006
x_9 0.0003
x_10 -0.0005
x_11 -0.0003
x_12 -0.0012
x_13 -0.0000

$P_0$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 2.9565 -0.0008 0.0011 0.0001 -0.0007 -0.0009 0.0002 -0.0001 0.0011 -0.0006 0.0005 -0.0011 -0.0014 0.0004
x_1 -0.0008 2.9605 -0.0000 0.0004 0.0015 -0.0006 0.0000 0.0005 -0.0009 0.0003 -0.0002 0.0002 0.0015 -0.0003
x_2 0.0011 -0.0000 2.9587 -0.0004 -0.0004 0.0008 -0.0002 0.0003 -0.0002 -0.0003 0.0003 0.0003 -0.0002 -0.0002
x_3 0.0001 0.0004 -0.0004 2.9591 0.0011 -0.0002 -0.0003 0.0007 -0.0005 -0.0001 -0.0006 -0.0005 0.0007 0.0000
x_4 -0.0007 0.0015 -0.0004 0.0011 2.9663 -0.0018 -0.0008 -0.0026 0.0005 0.0010 -0.0003 0.0005 0.0002 -0.0005
x_5 -0.0009 -0.0006 0.0008 -0.0002 -0.0018 2.9595 0.0005 0.0017 0.0001 -0.0009 0.0002 -0.0014 -0.0003 0.0000
x_6 0.0002 0.0000 -0.0002 -0.0003 -0.0008 0.0005 2.9590 0.0010 -0.0003 -0.0002 -0.0001 -0.0002 0.0005 0.0001
x_7 -0.0001 0.0005 0.0003 0.0007 -0.0026 0.0017 0.0010 2.9589 -0.0008 0.0004 -0.0001 0.0009 0.0007 -0.0010
x_8 0.0011 -0.0009 -0.0002 -0.0005 0.0005 0.0001 -0.0003 -0.0008 2.9602 -0.0002 0.0002 -0.0001 -0.0015 0.0003
x_9 -0.0006 0.0003 -0.0003 -0.0001 0.0010 -0.0009 -0.0002 0.0004 -0.0002 2.9588 -0.0002 -0.0003 0.0005 0.0003
x_10 0.0005 -0.0002 0.0003 -0.0006 -0.0003 0.0002 -0.0001 -0.0001 0.0002 -0.0002 2.9597 0.0003 -0.0004 0.0001
x_11 -0.0011 0.0002 0.0003 -0.0005 0.0005 -0.0014 -0.0002 0.0009 -0.0001 -0.0003 0.0003 2.9583 0.0005 0.0004
x_12 -0.0014 0.0015 -0.0002 0.0007 0.0002 -0.0003 0.0005 0.0007 -0.0015 0.0005 -0.0004 0.0005 2.9619 -0.0007
x_13 0.0004 -0.0003 -0.0002 0.0000 -0.0005 0.0000 0.0001 -0.0010 0.0003 0.0003 0.0001 0.0004 -0.0007 2.9587
show_results(learn_TA, items = items_TA, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=4518, shift=15, var_sel=['TA'], gap_len=12), MeteoImpItem(i=3825, shift=5, var_sel=['TA'], gap_len=12), MeteoImpItem(i=3706, shift=-25, var_sel=['TA'], gap_len=12), MeteoImpItem(i=4397, shift=15, var_sel=['TA'], gap_len=12)]

TA - 96

after the first trainig fine tune the model for longer gaps

dls_TA96 = imp_dataloader(haiB, hai_eraB, var_sel = 'TA', block_len=300, gap_len=96, bs=20, control_lags=[1], n_rep=5).cpu()
model_TA96 = model_TA.copy()
save_models_TA96 = SaveModelsBatch(times_epoch=5)
items_TA96 = random.choices(dls_TA96.valid.items, k=4)
learn_TA96 = Learner(dls_TA96, model_TA96, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_TA96], metrics=rmse_gap)
show_results(learn_TA96, items = items_TA96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=723, shift=90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=718, shift=30, var_sel=['TA'], gap_len=96), MeteoImpItem(i=706, shift=-150, var_sel=['TA'], gap_len=96), MeteoImpItem(i=756, shift=90, var_sel=['TA'], gap_len=96)]
learn_TA96.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 -61.745678 -63.454943 0.116816 22:50
learn_TA96.recorder.plot_loss()

show_results(learn_TA96, items = items_TA96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=723, shift=90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=718, shift=30, var_sel=['TA'], gap_len=96), MeteoImpItem(i=706, shift=-150, var_sel=['TA'], gap_len=96), MeteoImpItem(i=756, shift=90, var_sel=['TA'], gap_len=96)]
torch.save(learn_TA96.model, "trained_4_feb_TA_gap_96_v1.pickle")

All variables - 12 varying

Model Av (All varying) all variables with a varing numbers of variables

dls_Av = imp_dataloader(haiB, hai_eraB, var_sel = gen_var_sel(list(haiB.columns)), block_len=100, gap_len=12, bs=20, control_lags=[1], n_rep=10).cpu()
model_Av = model_TA.copy() 
save_models_Av = SaveModelsBatch(times_epoch=1) # save once per repetition
items_Av = random.choices(dls_Av.valid.items, k=4)
learn_Av = Learner(dls_Av, model_Av, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Av], metrics=rmse_gap)
learn_Av.fit(2, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 27.495375 25.322661 0.489185 26:10
1 14.247212 13.797795 0.408078 26:31
learn_Av.recorder.plot_loss()

with with_settings(learn_Av.model, use_conditional =False): #speed up training
    learn_Av.fit(2, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 7.901138 7.892225 0.360798 23:14
1 2.295464 4.088155 0.336661 23:39
learn_Av.recorder.plot_loss()

torch.save(learn_Av.model, "trained_4_feb_All_gap_varying_12_v1.pickle")
show_results(learn_Av, items=items_Av, control_map=control_map)
[MeteoImpItem(i=1867, shift=0, var_sel=['TA'], gap_len=12), MeteoImpItem(i=2001, shift=-50, var_sel=['PA', 'VPD', 'LW_IN', 'TA', 'WS'], gap_len=12), MeteoImpItem(i=1960, shift=-40, var_sel=['TA', 'PA', 'P', 'SW_IN', 'VPD', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=2163, shift=30, var_sel=['TA'], gap_len=12)]
with with_settings(learn_Av.model, use_conditional =False): #speed up training
    learn_Av.fit(2, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 3.646232 1.610648 0.321632 24:10
1 -1.735119 -0.024182 0.312380 24:34
learn_Av.recorder.plot_loss()

torch.save(learn_Av.model, "trained_4_feb_All_gap_varying_12_v2.pickle")
show_results(learn_Av, items=items_Av, control_map=control_map)
[MeteoImpItem(i=1867, shift=0, var_sel=['TA'], gap_len=12), MeteoImpItem(i=2001, shift=-50, var_sel=['PA', 'VPD', 'LW_IN', 'TA', 'WS'], gap_len=12), MeteoImpItem(i=1960, shift=-40, var_sel=['TA', 'PA', 'P', 'SW_IN', 'VPD', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=2163, shift=30, var_sel=['TA'], gap_len=12)]

All variables - 12 all gap

dls_Aa = imp_dataloader(haiB, hai_eraB, var_sel = list(haiB.columns), block_len=70, gap_len=12, bs=20, control_lags=[1], n_rep=10).cpu()
model_Aa = model_Av.copy() 
model_Aa.use_conditional = False
save_models_Aa = SaveModelsBatch(times_epoch=1) # save once per repetition
items_Aa = random.choices(dls_Aa.valid.items, k=4)
learn_Aa = Learner(dls_Aa, model_Aa, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Aa], metrics=rmse_gap)
learn_Aa.fit(2, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 -3.860496 -2.163738 0.297889 32:06
1 -3.991504 -3.813984 0.290560 32:34
learn_Aa.recorder.plot_loss()

torch.save(learn_Aa.model, "trained_4_feb_All_gap_all_12_v1.pickle")
show_results(learn_Aa, items=items_Aa, control_map=control_map)
[MeteoImpItem(i=3064, shift=7, var_sel=['P', 'VPD', 'WS', 'SW_IN', 'PA', 'TA'], gap_len=12), MeteoImpItem(i=2776, shift=7, var_sel=['VPD', 'PA', 'SW_IN', 'WS'], gap_len=12), MeteoImpItem(i=3253, shift=-21, var_sel=['VPD', 'SW_IN'], gap_len=12), MeteoImpItem(i=2716, shift=14, var_sel=['SW_IN'], gap_len=12)]

SW_IN - gap 12

dls_SW_IN = imp_dataloader(haiB, hai_eraB, var_sel = 'SW_IN', block_len=50, gap_len=12, bs=20, control_lags=[1], n_rep=5).cpu()
model_SW_IN = model_Av.copy()
save_models_SW_IN = SaveModelsBatch(times_epoch=10)
items_SW_IN = random.choices(dls_SW_IN.valid.items, k=4)
learn_SW_IN = Learner(dls_SW_IN, model_SW_IN, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_SW_IN], metrics=rmse_gap)
learn_SW_IN.fit(3, 5e-4)
epoch train_loss valid_loss rmse_gap time
0 1.571930 2.161097 0.229693 12:20
1 1.701283 1.642652 0.216893 12:26
2 1.033681 1.752522 0.214100 12:50
learn_SW_IN.recorder.plot_loss()

torch.save(learn_SW_IN.model, "trained_4_feb_SW_IN_gap_12_v1.pickle")
learn_SW_IN

Kalman Filter (7 obs, 14 state, 14 contr)

$A$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 1.2092 0.0099 -0.1905 0.0169 -0.0984 -0.0086 -0.0375 0.8756 0.0942 0.2282 -0.1515 -0.0565 -0.0419 -0.0731
x_1 0.0250 1.1628 -0.1911 0.1139 -0.0933 -0.0061 -0.0202 0.0043 0.8123 0.1668 -0.1492 0.0625 0.0224 0.0082
x_2 0.0058 -0.0328 1.2095 -0.1337 0.0424 0.0783 0.0067 -0.0326 -0.0048 0.6848 0.1042 -0.0030 -0.0034 0.0172
x_3 0.0506 0.0430 -0.1128 1.1909 -0.0675 -0.0662 0.0139 0.0205 0.0210 0.1381 0.6624 0.0020 0.0031 -0.0198
x_4 -0.1150 -0.0565 0.1214 -0.1329 1.1548 0.0006 0.0438 0.0248 0.0160 -0.1514 0.2205 0.7190 -0.0158 -0.0161
x_5 -0.0051 -0.0034 0.1459 -0.1069 0.0482 1.0111 0.0273 0.1421 -0.0900 -0.1165 0.1743 -0.0579 0.6217 0.0555
x_6 -0.1867 0.0374 0.0283 0.0998 0.1161 -0.1061 1.0440 0.0212 -0.1477 0.0734 -0.0844 0.0142 0.1911 1.1859
x_7 0.1577 -0.0190 -0.0549 0.0844 0.0527 0.0865 0.0427 0.9606 -0.0319 -0.1238 0.0597 0.0510 0.0823 0.0125
x_8 0.0562 0.1505 -0.0321 0.0300 -0.0300 0.0235 -0.0623 -0.0047 0.9781 -0.0897 0.0459 0.0068 0.0778 0.0129
x_9 0.0766 -0.0076 0.1531 -0.0059 -0.0445 -0.0611 0.0210 -0.0223 -0.0109 1.1227 -0.0934 0.0140 -0.0822 -0.0101
x_10 -0.0910 -0.0241 -0.0252 0.1167 0.0297 0.0981 -0.0182 -0.0112 0.0018 -0.1045 1.1444 0.0188 0.0485 0.0639
x_11 0.0049 -0.0412 0.0115 -0.0502 0.1451 -0.0527 -0.0148 0.0156 -0.0171 0.0801 -0.0582 1.0207 -0.0824 0.0098
x_12 -0.1013 0.0220 -0.0262 -0.0522 0.0147 0.1882 0.0488 -0.0134 0.0225 0.0985 -0.0930 -0.0503 0.9256 -0.0362
x_13 -0.0310 0.0349 0.0375 -0.0787 0.0667 -0.0509 0.1694 -0.0421 0.0158 -0.0043 0.0215 0.0122 -0.1242 0.8931

$Q$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 0.0535 -0.0058 0.0094 -0.0022 0.0197 -0.0093 0.0291 0.0008 -0.0093 0.0033 0.0175 -0.0064 0.0123 -0.0169
x_1 -0.0058 0.0853 0.0153 -0.0184 0.0206 -0.0001 -0.0288 0.0110 0.0101 0.0152 -0.0061 -0.0041 0.0050 0.0180
x_2 0.0094 0.0153 0.0469 0.0356 -0.0255 -0.0048 -0.0373 -0.0236 -0.0104 0.0321 0.0009 0.0050 0.0260 0.0117
x_3 -0.0022 -0.0184 0.0356 0.0919 0.0083 -0.0059 -0.0550 -0.0075 -0.0501 0.0344 0.0013 0.0492 0.0305 0.0328
x_4 0.0197 0.0206 -0.0255 0.0083 0.1291 -0.0189 0.0220 0.0355 -0.0023 -0.0047 -0.0040 0.0122 -0.0185 -0.0110
x_5 -0.0093 -0.0001 -0.0048 -0.0059 -0.0189 0.0369 -0.0015 -0.0240 0.0130 -0.0187 -0.0059 -0.0153 0.0204 -0.0223
x_6 0.0291 -0.0288 -0.0373 -0.0550 0.0220 -0.0015 0.2657 0.0163 0.0369 -0.0117 0.0199 -0.0298 -0.0415 -0.0983
x_7 0.0008 0.0110 -0.0236 -0.0075 0.0355 -0.0240 0.0163 0.0855 -0.0254 0.0138 -0.0050 0.0298 -0.0230 0.0170
x_8 -0.0093 0.0101 -0.0104 -0.0501 -0.0023 0.0130 0.0369 -0.0254 0.1099 -0.0127 -0.0089 -0.0276 -0.0180 -0.0152
x_9 0.0033 0.0152 0.0321 0.0344 -0.0047 -0.0187 -0.0117 0.0138 -0.0127 0.0855 0.0023 0.0067 0.0010 -0.0180
x_10 0.0175 -0.0061 0.0009 0.0013 -0.0040 -0.0059 0.0199 -0.0050 -0.0089 0.0023 0.0590 0.0247 -0.0185 0.0288
x_11 -0.0064 -0.0041 0.0050 0.0492 0.0122 -0.0153 -0.0298 0.0298 -0.0276 0.0067 0.0247 0.1196 -0.0086 0.0207
x_12 0.0123 0.0050 0.0260 0.0305 -0.0185 0.0204 -0.0415 -0.0230 -0.0180 0.0010 -0.0185 -0.0086 0.0850 0.0112
x_13 -0.0169 0.0180 0.0117 0.0328 -0.0110 -0.0223 -0.0983 0.0170 -0.0152 -0.0180 0.0288 0.0207 0.0112 0.3036

$b$

state offset
x_0 -0.0035
x_1 -0.0264
x_2 0.0016
x_3 -0.0055
x_4 -0.0095
x_5 -0.0025
x_6 -0.0167
x_7 0.0090
x_8 0.0001
x_9 0.0003
x_10 0.0088
x_11 -0.0035
x_12 0.0114
x_13 -0.0019

$H$

variable x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
y_0 0.1119 -0.1229 0.5709 -0.2683 0.0774 -0.0004 0.0066 0.0550 -0.0632 0.0880 -0.0444 0.0467 -0.0558 0.0246
y_1 0.7579 0.1629 0.2327 -0.0197 -0.0384 -0.2112 -0.0065 0.1209 -0.0729 0.2284 -0.0980 0.0492 0.1636 -0.0476
y_2 0.0352 0.0115 0.5271 0.4768 -0.0234 0.0289 -0.0082 0.0849 0.0510 -0.0186 0.0404 -0.0603 -0.1070 -0.0790
y_3 0.0269 0.0600 0.1747 -0.1470 -0.0931 -0.6539 0.0019 -0.0266 0.0604 -0.0557 -0.0043 -0.0811 -0.0164 -0.0517
y_4 0.0068 -0.1011 -0.0548 -0.1172 -0.0165 0.0706 1.3534 -0.0528 0.0761 -0.1036 0.1065 -0.1210 0.0232 -0.3115
y_5 0.0833 -0.0442 -0.1637 0.0456 0.9350 0.1287 -0.0171 -0.0895 -0.0563 -0.1668 0.1400 0.1260 -0.0767 0.0628
y_6 -0.1897 -0.8226 -0.0693 0.0926 0.0038 -0.1236 0.0132 -0.0346 -0.1286 0.1014 -0.0264 0.0526 -0.0744 0.0379

$R$

variable y_0 y_1 y_2 y_3 y_4 y_5 y_6
y_0 0.0014 -0.0038 0.0011 0.0035 0.0017 0.0021 -0.0001
y_1 -0.0038 0.0176 -0.0059 -0.0111 0.0069 0.0018 0.0015
y_2 0.0011 -0.0059 0.0058 0.0040 0.0065 -0.0014 -0.0039
y_3 0.0035 -0.0111 0.0040 0.0107 0.0027 0.0077 -0.0039
y_4 0.0017 0.0069 0.0065 0.0027 0.0722 -0.0017 -0.0053
y_5 0.0021 0.0018 -0.0014 0.0077 -0.0017 0.0376 -0.0054
y_6 -0.0001 0.0015 -0.0039 -0.0039 -0.0053 -0.0054 0.0136

$d$

variable offset
y_0 -0.0007
y_1 -0.0063
y_2 -0.0204
y_3 0.0145
y_4 -0.0090
y_5 -0.0468
y_6 -0.0133

$B$

state c_0 c_1 c_2 c_3 c_4 c_5 c_6 c_7 c_8 c_9 c_10 c_11 c_12 c_13
x_0 0.1186 -0.8485 -0.0630 0.1522 0.0082 -0.0564 -0.0824 0.1909 0.8190 -0.0354 0.1066 -0.0523 0.0139 -0.0462
x_1 0.2492 0.0849 -0.0074 0.0056 -0.0049 -0.0478 0.8928 0.1398 0.0167 -0.0199 -0.0437 0.0060 0.0587 -0.7312
x_2 -0.8873 -0.0521 -0.4452 0.0263 0.0363 0.1346 0.1307 0.6552 0.0144 0.4837 0.1016 0.0027 -0.0666 -0.1407
x_3 0.5261 -0.0119 -0.8469 -0.0277 -0.0336 -0.1063 -0.0656 -0.2830 -0.1705 0.8257 -0.0865 0.0056 0.0958 0.0383
x_4 -0.1738 0.0272 0.0501 0.1181 -0.0083 -0.9154 0.0459 -0.1750 0.1252 0.0355 -0.1135 0.0329 0.8490 -0.0108
x_5 -0.0502 -0.1978 0.0494 0.9221 -0.0033 0.2204 0.0062 -0.1387 -0.0034 -0.0207 -0.9759 0.0176 -0.1051 -0.0576
x_6 -0.0261 0.0876 -0.0362 -0.0741 -0.6339 0.0092 0.0529 -0.0338 0.0857 -0.0185 -0.0145 0.6687 -0.0498 0.0243
x_7 0.0388 -0.1206 -0.0035 0.1071 -0.0304 -0.0273 -0.0187 0.0207 -0.1097 0.0004 0.1018 -0.0148 -0.0261 -0.0148
x_8 0.0774 -0.0308 -0.0022 0.0180 0.0222 0.0056 0.0632 0.0798 -0.0283 -0.0045 0.0180 -0.0022 0.0181 0.0522
x_9 -0.0570 -0.0036 -0.0621 -0.0331 0.0118 0.0193 0.0259 -0.0540 -0.0366 -0.0568 -0.0314 0.0155 0.0005 0.0092
x_10 0.0497 0.0343 -0.0569 0.0741 0.0015 -0.0078 -0.0098 0.0436 0.0464 -0.0525 0.0730 -0.0180 -0.0003 0.0007
x_11 -0.0501 0.0310 0.0116 -0.0328 0.0160 -0.0563 -0.0090 -0.0572 0.0090 0.0115 -0.0320 0.0180 -0.0631 -0.0271
x_12 -0.0648 0.0881 0.0226 0.0939 -0.0059 -0.0011 0.0477 -0.0567 0.0641 0.0235 0.0896 -0.0028 -0.0068 0.0513
x_13 -0.0057 0.0274 -0.0023 -0.0298 -0.1657 0.0082 0.0236 -0.0124 0.0238 0.0026 -0.0266 0.0452 -0.0011 0.0150

$m_0$

state mean
x_0 0.0011
x_1 -0.0008
x_2 -0.0004
x_3 -0.0001
x_4 -0.0002
x_5 0.0003
x_6 -0.0000
x_7 -0.0008
x_8 0.0006
x_9 0.0003
x_10 -0.0005
x_11 -0.0003
x_12 -0.0012
x_13 -0.0000

$P_0$

state x_0 x_1 x_2 x_3 x_4 x_5 x_6 x_7 x_8 x_9 x_10 x_11 x_12 x_13
x_0 2.8754 -0.0007 0.0011 0.0001 -0.0005 -0.0010 0.0001 -0.0001 0.0010 -0.0006 0.0007 -0.0014 -0.0013 0.0003
x_1 -0.0007 2.8798 0.0000 0.0004 0.0015 -0.0006 -0.0000 0.0004 -0.0009 0.0003 -0.0001 0.0001 0.0014 -0.0003
x_2 0.0011 0.0000 2.8779 -0.0005 -0.0004 0.0008 -0.0002 0.0004 -0.0002 -0.0003 0.0003 0.0003 -0.0002 -0.0001
x_3 0.0001 0.0004 -0.0005 2.8785 0.0010 -0.0002 -0.0002 0.0008 -0.0004 -0.0001 -0.0009 -0.0003 0.0007 0.0001
x_4 -0.0005 0.0015 -0.0004 0.0010 2.8854 -0.0017 -0.0008 -0.0026 0.0005 0.0011 -0.0001 0.0004 0.0002 -0.0005
x_5 -0.0010 -0.0006 0.0008 -0.0002 -0.0017 2.8786 0.0005 0.0016 0.0001 -0.0009 0.0002 -0.0014 -0.0003 0.0000
x_6 0.0001 -0.0000 -0.0002 -0.0002 -0.0008 0.0005 2.8783 0.0010 -0.0002 -0.0002 -0.0002 -0.0002 0.0005 0.0001
x_7 -0.0001 0.0004 0.0004 0.0008 -0.0026 0.0016 0.0010 2.8781 -0.0007 0.0004 -0.0001 0.0009 0.0007 -0.0009
x_8 0.0010 -0.0009 -0.0002 -0.0004 0.0005 0.0001 -0.0002 -0.0007 2.8794 -0.0002 0.0001 0.0000 -0.0015 0.0003
x_9 -0.0006 0.0003 -0.0003 -0.0001 0.0011 -0.0009 -0.0002 0.0004 -0.0002 2.8781 -0.0001 -0.0003 0.0005 0.0003
x_10 0.0007 -0.0001 0.0003 -0.0009 -0.0001 0.0002 -0.0002 -0.0001 0.0001 -0.0001 2.8795 0.0001 -0.0004 -0.0000
x_11 -0.0014 0.0001 0.0003 -0.0003 0.0004 -0.0014 -0.0002 0.0009 0.0000 -0.0003 0.0001 2.8775 0.0004 0.0004
x_12 -0.0013 0.0014 -0.0002 0.0007 0.0002 -0.0003 0.0005 0.0007 -0.0015 0.0005 -0.0004 0.0004 2.8811 -0.0007
x_13 0.0003 -0.0003 -0.0001 0.0001 -0.0005 0.0000 0.0001 -0.0009 0.0003 0.0003 -0.0000 0.0004 -0.0007 2.8780
show_results(learn_SW_IN, items = items_SW_IN, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=3927, shift=-15, var_sel=['SW_IN'], gap_len=12), MeteoImpItem(i=4045, shift=-5, var_sel=['SW_IN'], gap_len=12), MeteoImpItem(i=4390, shift=-25, var_sel=['SW_IN'], gap_len=12), MeteoImpItem(i=3968, shift=-25, var_sel=['SW_IN'], gap_len=12)]

SW_IN - 96

after the first trainig fine tune the model for longer gaps

dls_SW_IN96 = imp_dataloader(haiB, hai_eraB, var_sel = 'SW_IN', block_len=300, gap_len=96, bs=20, control_lags=[1], n_rep=5).cpu()
model_SW_IN96 = model_SW_IN.copy()
save_models_SW_IN96 = SaveModelsBatch(times_epoch=5)
items_SW_IN96 = random.choices(dls_SW_IN96.valid.items, k=4)
learn_SW_IN96 = Learner(dls_SW_IN96, model_SW_IN96, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_SW_IN96], metrics=rmse_gap)
show_results(learn_SW_IN96, items = items_SW_IN96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=660, shift=30, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=684, shift=30, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=684, shift=90, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=632, shift=-90, var_sel=['SW_IN'], gap_len=96)]
learn_SW_IN96.fit(2, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 23.111818 21.649970 0.277776 22:20
1 23.287213 21.537204 0.277468 22:06
learn_SW_IN96.recorder.plot_loss()

show_results(learn_SW_IN96, items = items_SW_IN96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=660, shift=30, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=684, shift=30, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=684, shift=90, var_sel=['SW_IN'], gap_len=96), MeteoImpItem(i=632, shift=-90, var_sel=['SW_IN'], gap_len=96)]
torch.save(learn_SW_IN96.model, "trained_4_feb_SW_IN_gap_96_v1.pickle")

Control

No Control - TA 12

dls_TA_nc = imp_dataloader(haiB, hai_eraB, var_sel = 'TA', block_len=50, gap_len=12, bs=20, control_lags=[1], n_rep=1).cpu()
model_TA_nc = model_TA96.copy()
model_TA_nc.use_control = False
save_models_TA_nc = SaveModelsBatch(times_epoch=2)
items_TA_nc = random.choices(dls_TA_nc.valid.items, k=4)
learn_TA_nc = Learner(dls_TA_nc, model_TA_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_TA_nc], metrics=rmse_gap)
learn_TA_nc.fit(1, 5e-4)
epoch train_loss valid_loss rmse_gap time
0 -18.464700 -18.834046 0.040957 02:33
learn_TA_nc.recorder.plot_loss()

torch.save(learn_TA_nc.model, "trained_4_feb_TA_gap_12_no_control_v1.pickle")
show_results(learn_TA_nc, items = items_TA_nc, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=3919, shift=15, var_sel=['TA'], gap_len=12), MeteoImpItem(i=4068, shift=15, var_sel=['TA'], gap_len=12), MeteoImpItem(i=4114, shift=-15, var_sel=['TA'], gap_len=12), MeteoImpItem(i=4180, shift=-5, var_sel=['TA'], gap_len=12)]

No Control - TA 96

dls_TA96_nc = imp_dataloader(haiB, hai_eraB, var_sel = 'TA', block_len=300, gap_len=96, bs=20, control_lags=[1], n_rep=5).cpu()
model_TA96_nc = model_TA_nc.copy()
model_TA96_nc.use_control = False
/home/simone/anaconda3/envs/data-science/lib/python3.10/site-packages/fastai/callback/core.py:69: UserWarning: You are shadowing an attribute (__class__) that exists in the learner. Use `self.learn.__class__` to avoid this
  warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
save_models_TA96_nc = SaveModelsBatch(times_epoch=2)
items_TA96_nc = random.choices(dls_TA96_nc.valid.items, k=4)
learn_TA96_nc = Learner(dls_TA96_nc, model_TA96_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_TA96_nc], metrics=rmse_gap)
show_results(learn_TA96_nc, items = items_TA96_nc, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=757, shift=-90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=711, shift=-90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=743, shift=-150, var_sel=['TA'], gap_len=96), MeteoImpItem(i=625, shift=-30, var_sel=['TA'], gap_len=96)]
learn_TA96_nc.fit(2, 5e-4)
epoch train_loss valid_loss rmse_gap time
0 -16.875112 -38.792134 0.158970 24:20
1 -36.028650 -42.915542 0.153475 22:35
learn_TA96_nc.recorder.plot_loss()

torch.save(learn_TA96_nc.model, "trained_4_feb_TA_gap_96_no_control_v1.pickle")
show_results(learn_TA96_nc, items = items_TA96_nc, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=757, shift=-90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=711, shift=-90, var_sel=['TA'], gap_len=96), MeteoImpItem(i=743, shift=-150, var_sel=['TA'], gap_len=96), MeteoImpItem(i=625, shift=-30, var_sel=['TA'], gap_len=96)]

No Control - All variables - 12 all gap

dls_Aa96_nc = imp_dataloader(haiB, hai_eraB, var_sel = list(haiB.columns), block_len=70, gap_len=12, bs=20, control_lags=[1], n_rep=2).cpu()
model_Aa96_nc = model_Aa96.copy() 
model_Aa96_nc.use_control = False
save_models_Aa96_nc= SaveModelsBatch(times_epoch=1) # save once per repetition
items_Aa96_nc  = random.choices(dls_Aa96_nc.valid.items, k=4)
learn_Aa96_nc = Learner(dls_Aa96_nc, model_Aa96_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Aa96_nc], metrics=rmse_gap)
show_results(learn_Aa96_nc, items=items_Aa96_nc, control_map=control_map)
[MeteoImpItem(i=3064, shift=7, var_sel=['P', 'VPD', 'WS', 'SW_IN', 'PA', 'TA'], gap_len=12), MeteoImpItem(i=2776, shift=7, var_sel=['VPD', 'PA', 'SW_IN', 'WS'], gap_len=12), MeteoImpItem(i=3253, shift=-21, var_sel=['VPD', 'SW_IN'], gap_len=12), MeteoImpItem(i=2716, shift=14, var_sel=['SW_IN'], gap_len=12)]
learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 38.744036 30.468859 0.436350 03:39
learn_Aa96_nc.recorder.plot_loss()

learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 12.341979 12.002963 0.392356 03:51
learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 5.277773 4.664958 0.367391 37:31
learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 1.912588 0.122255 0.352851 04:10
learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 -2.542222 -3.259210 0.344657 03:51
learn_Aa96_nc.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 -5.897630 -5.467389 0.339895 04:01
learn_Aa96_nc.recorder.plot_loss()

torch.save(learn_Aa96_nc.model, "trained_4_feb_All_gap_all_12_no_control_v1.pickle")
show_results(learn_Aa96_nc, items=items_Aa96_nc, control_map=control_map)
[MeteoImpItem(i=3077, shift=-35, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=3195, shift=-35, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=3230, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=2910, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12)]

No Control - All variables - 24 all gap

dls_Aa24_nc = imp_dataloader(haiB, hai_eraB, var_sel = list(haiB.columns), block_len=150, gap_len=24, bs=20, control_lags=[1], n_rep=2).cpu()
model_Aa24_nc = model_Aa_nc.copy() 
model_Aa24_nc.use_control = False
save_models_Aa24_nc= SaveModelsBatch(times_epoch=1) # save once per repetition
items_Aa24_nc  = random.choices(dls_Aa24_nc.valid.items, k=4)
learn_Aa24_nc = Learner(dls_Aa24_nc, model_Aa24_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Aa24_nc], metrics=rmse_gap)
show_results(learn_Aa24_nc, items=items_Aa24_nc, control_map=control_map)
[MeteoImpItem(i=1439, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1222, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1376, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1364, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24)]
learn_Aa24_nc.fit(5, 3e-4)
epoch train_loss valid_loss rmse_gap time
0 41.673347 42.895707 0.457301 04:47
1 42.247850 40.339583 0.451268 04:37
2 34.196943 38.366525 0.445817 04:47
3 32.270157 36.991478 0.443363 04:42
4 33.566557 35.243634 0.436804 04:54
learn_Aa24_nc.recorder.plot_loss()

torch.save(learn_Aa24_nc.model, "trained_4_feb_All_gap_all_24_no_control_v1.pickle")
show_results(learn_Aa24_nc, items=items_Aa24_nc, control_map=control_map)
[MeteoImpItem(i=1439, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1222, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1376, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24), MeteoImpItem(i=1364, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=24)]

No Control - All variables - 24 varying

dls_Av24_nc = imp_dataloader(haiB, hai_eraB, var_sel = gen_var_sel(list(haiB.columns)), block_len=150, gap_len=24, bs=20, control_lags=[1], n_rep=2).cpu()
model_Av24_nc = model_Aa24_nc.copy() 
model_Av24_nc.use_control = False
save_models_Av24_nc= SaveModelsBatch(times_epoch=1) # save once per repetition
items_Av24_nc  = random.choices(dls_Av24_nc.valid.items, k=4)
learn_Av24_nc = Learner(dls_Av24_nc, model_Av24_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Av24_nc], metrics=rmse_gap)
show_results(learn_Av24_nc, items=items_Av24_nc, control_map=control_map)
[MeteoImpItem(i=1438, shift=0, var_sel=['SW_IN', 'TA', 'PA', 'LW_IN', 'VPD', 'WS', 'P'], gap_len=24), MeteoImpItem(i=1275, shift=0, var_sel=['WS', 'SW_IN', 'TA', 'LW_IN'], gap_len=24), MeteoImpItem(i=1316, shift=-75, var_sel=['VPD', 'WS', 'TA', 'P', 'LW_IN', 'PA', 'SW_IN'], gap_len=24), MeteoImpItem(i=1273, shift=-75, var_sel=['P', 'TA', 'SW_IN', 'LW_IN', 'WS'], gap_len=24)]
learn_Av24_nc.fit(2, 3e-4)
epoch train_loss valid_loss rmse_gap time
0 18.450633 16.309334 0.376543 06:58
1 15.670756 16.164595 0.374336 06:24
learn_Av24_nc.recorder.plot_loss()

torch.save(learn_Av24_nc.model, "trained_4_feb_All_gap_varying_24_no_control_v1.pickle")
show_results(learn_Av24_nc, items=items_Av24_nc, control_map=control_map)

No Control - All variables - 36 all gap

dls_Aa36_nc = imp_dataloader(haiB, hai_eraB, var_sel = list(haiB.columns), block_len=150, gap_len=36, bs=20, control_lags=[1], n_rep=2).cpu()
model_Aa36_nc = model_Aa24_nc.copy() 
model_Aa36_nc.use_control = False
save_models_Aa36_nc= SaveModelsBatch(times_epoch=1) # save once per repetition
items_Aa36_nc  = random.choices(dls_Aa36_nc.valid.items, k=4)
learn_Aa36_nc = Learner(dls_Aa36_nc, model_Aa36_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Aa36_nc], metrics=rmse_gap)
show_results(learn_Aa36_nc, items=items_Aa36_nc, control_map=control_map)
[MeteoImpItem(i=1384, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=36), MeteoImpItem(i=1362, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=36), MeteoImpItem(i=1410, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=36), MeteoImpItem(i=1505, shift=-75, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=36)]
learn_Aa36_nc.fit(2, 3e-4)
0.00% [0/2 00:00<?]
epoch train_loss valid_loss rmse_gap time

0.00% [0/121 00:00<?]
ValueError: Expected parameter covariance_matrix (Tensor of shape (7, 7)) of distribution MultivariateNormal(loc: torch.Size([7]), covariance_matrix: torch.Size([7, 7])) to satisfy the constraint PositiveDefinite(), but found invalid values:
tensor([[ 1.6642e-02,  4.3281e-03,  9.5652e-03,  2.6394e-03,  2.6164e-02,
          1.5804e-02,  3.9479e-02],
        [ 4.3281e-03,  5.1357e-02,  3.1779e-03,  4.4854e-03, -1.5141e-02,
          3.6567e-03, -3.5843e-02],
        [ 9.5652e-03,  3.1779e-03,  1.6632e-02, -4.0737e-04,  1.4566e-02,
          1.5618e-02,  1.2989e-02],
        [ 2.6394e-03,  4.4854e-03, -4.0737e-04,  4.9380e-03, -9.9333e-03,
         -1.2459e-03, -7.2656e-03],
        [ 2.6164e-02, -1.5141e-02,  1.4566e-02, -9.9333e-03,  7.9823e-01,
          2.5349e-02,  5.7989e-02],
        [ 1.5804e-02,  3.6567e-03,  1.5618e-02, -1.2459e-03,  2.5349e-02,
          1.0991e-01,  6.7019e-02],
        [ 3.9479e-02, -3.5843e-02,  1.2989e-02, -7.2656e-03,  5.7989e-02,
          6.7019e-02,  1.3826e-01]], dtype=torch.float64,
       grad_fn=<ExpandBackward0>)
learn_Aa36_nc.recorder.plot_loss()
torch.save(learn_Aa36_nc.model, "trained_4_feb_All_gap_all_36_no_control_v1.pickle")
show_results(learn_Aa36_nc, items=items_Aa36_nc, control_map=control_map)

No Control - All variables - 48 all gap

dls_Aa48_nc = imp_dataloader(haiB, hai_eraB, var_sel = list(haiB.columns), block_len=350, gap_len=48, bs=20, control_lags=[1], n_rep=2).cpu()
model_Aa48_nc = model_Aa_nc.copy() 
model_Aa48_nc.use_control = False
save_models_Aa48_nc= SaveModelsBatch(times_epoch=1) # save once per repetition
items_Aa48_nc  = random.choices(dls_Aa48_nc.valid.items, k=4)
learn_Aa48_nc = Learner(dls_Aa48_nc, model_Aa48_nc, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_Aa48_nc], metrics=rmse_gap)
show_results(learn_Aa48_nc, items=items_Aa48_nc, control_map=control_map)
[MeteoImpItem(i=640, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=48), MeteoImpItem(i=534, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=48), MeteoImpItem(i=621, shift=-175, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=48), MeteoImpItem(i=559, shift=-175, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=48)]
learn_Aa48_nc.fit(5, 3e-4)
0.00% [0/5 00:00<?]
epoch train_loss valid_loss rmse_gap time

0.00% [0/51 00:00<?]
ValueError: Expected parameter covariance_matrix (Tensor of shape (7, 7)) of distribution MultivariateNormal(loc: torch.Size([7]), covariance_matrix: torch.Size([7, 7])) to satisfy the constraint PositiveDefinite(), but found invalid values:
tensor([[-2.1252e+31, -7.2463e+31, -5.0732e+29, -7.1944e+29,  1.8079e+31,
          7.5784e+30, -4.2668e+31],
        [-7.2463e+31, -2.4719e+32, -1.8432e+30, -2.4103e+30,  6.1574e+31,
          2.6057e+31, -1.4537e+32],
        [-5.0732e+29, -1.8432e+30, -1.2298e+29,  2.4634e+28,  3.6487e+29,
          3.9303e+29, -9.0798e+29],
        [-7.1944e+29, -2.4103e+30,  2.4634e+28, -4.0120e+28,  6.3716e+29,
          1.7656e+29, -1.4861e+30],
        [ 1.8079e+31,  6.1574e+31,  3.6487e+29,  6.3716e+29, -1.5419e+31,
         -6.3192e+30,  3.6363e+31],
        [ 7.5784e+30,  2.6057e+31,  3.9303e+29,  1.7656e+29, -6.3192e+30,
         -3.1082e+30,  1.5004e+31],
        [-4.2668e+31, -1.4537e+32, -9.0798e+29, -1.4861e+30,  3.6363e+31,
          1.5004e+31, -8.5775e+31]], dtype=torch.float64,
       grad_fn=<ExpandBackward0>)
learn_Aa48_nc.recorder.plot_loss()

torch.save(learn_Aa48_nc.model, "trained_4_feb_All_gap_all_48_no_control_v1.pickle")
show_results(learn_Aa48_nc, items=items_Aa48_nc, control_map=control_map)
[MeteoImpItem(i=3077, shift=-35, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=3195, shift=-35, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=3230, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12), MeteoImpItem(i=2910, shift=0, var_sel=['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN'], gap_len=12)]

LW_IN - gap 12

dls_LW_IN = imp_dataloader(haiB, hai_eraB, var_sel = 'LW_IN', block_len=50, gap_len=12, bs=20, control_lags=[1], n_rep=5).cpu()
model_LW_IN = model_Av.copy()
save_models_LW_IN = SaveModelsBatch(times_epoch=10)
items_LW_IN = random.choices(dls_LW_IN.valid.items, k=4)
learn_LW_IN = Learner(dls_LW_IN, model_LW_IN, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_LW_IN], metrics=rmse_gap)
learn_LW_IN.fit(1, 1e-3)
epoch train_loss valid_loss rmse_gap time
0 3.196418 3.997740 0.280681 11:39
learn_LW_IN.recorder.plot_loss()

torch.save(learn_LW_IN.model, "trained_4_feb_LW_IN_gap_12_v1.pickle")
show_results(learn_LW_IN, items = items_LW_IN, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=4462, shift=15, var_sel=['LW_IN'], gap_len=12), MeteoImpItem(i=4415, shift=5, var_sel=['LW_IN'], gap_len=12), MeteoImpItem(i=4322, shift=15, var_sel=['LW_IN'], gap_len=12), MeteoImpItem(i=4212, shift=-5, var_sel=['LW_IN'], gap_len=12)]

LW_IN - 96

after the first trainig fine tune the model for longer gaps

dls_LW_IN96 = imp_dataloader(haiB, hai_eraB, var_sel = 'LW_IN', block_len=300, gap_len=96, bs=20, control_lags=[1], n_rep=5).cpu()
model_LW_IN96 = model_LW_IN.copy()
save_models_LW_IN96 = SaveModelsBatch(times_epoch=5)
items_LW_IN96 = random.choices(dls_LW_IN96.valid.items, k=4)
learn_LW_IN96 = Learner(dls_LW_IN96, model_LW_IN96, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models_LW_IN96], metrics=rmse_gap)
show_results(learn_LW_IN96, items = items_LW_IN96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=634, shift=-90, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=735, shift=-30, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=651, shift=-150, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=627, shift=-90, var_sel=['LW_IN'], gap_len=96)]
learn_LW_IN96.fit(1, 1e-4)
epoch train_loss valid_loss rmse_gap time
0 51.693777 57.928376 0.419843 21:42
learn_LW_IN96.recorder.plot_loss()

show_results(learn_LW_IN96, items = items_LW_IN96, control_map = control_map, hide_no_gap=True)
[MeteoImpItem(i=634, shift=-90, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=735, shift=-30, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=651, shift=-150, var_sel=['LW_IN'], gap_len=96), MeteoImpItem(i=627, shift=-90, var_sel=['LW_IN'], gap_len=96)]
torch.save(learn_LW_IN96.model, "trained_4_feb_LW_IN_gap_96_v1.pickle")

Conditional

dls3 = imp_dataloader(haiB, haiB, var_sel = gen_var_sel(list(haiB.columns)), block_len=100, gap_len=10, bs=20, control_lags=[1], n_rep=10).cpu()
model3 = KalmanFilterSR.init_local_slope_pca(len(haiB.columns),len(haiB.columns), df_pca = None, pred_only_gap=True, use_conditional=True)
save_models3 = SaveModelsBatch(times_epoch=1) # save once per repetition
items = random.choices(dls3.valid.items, k=4)
learn3 = Learner(dls3, model3, KalmanLoss(only_gap=True), cbs = [Float64Callback, save_models2], metrics=rmse_gap)
learn3.fit(20, 1e-4)
learn3.recorder.plot_loss()
torch.save(learn3.model, "trained_2_feb_gap_partial_var_cond_v1.pickle")
# learnB.export("trained_2_feb_gap_all_var_v1")
show_results(learn3, items=items)